In this report, I demo the process we used to test for change points in white-tailed deer movement metrics. The approach used here is based on methods in Barrile et al. 2024.
For data ownership reasons, the data is not publicly available. However, I will preview the data structure so others can adapt this approach for themselves. Readers interested in data access should contact Daniel Storm at the Wisconsin Department of Natural Resources.
We’ll start by loading the packages we’ll need. I also always set my seed as standard practice (this makes randomization reproducible).
##### Clear Environment #####
remove(list=ls())
#### set seed ####
set.seed(48965)
#### load libraries ####
library(adehabitatLT)
library(ggplot2)
library(plyr)
library(dplyr)
library(lubridate)
library(amt)
library(ggpubr)
library(tidyr)
library(mcp)
The actual changepoint model fitting is fairly slow and resource intensive, so I did this using high throughput resources at the University of Wisconsin-Madison Center for High Throughput Computing. For completeness, I’ve included the R script used in this high throughput environment here, but I will just read in the output from these runs below.
#!/usr/bin/env Rscript
## HT_movement_cp.R
### Clear Environment ###
remove(list=ls())
#### load libraries ####
library(adehabitatLT)
library(ggplot2)
library(plyr)
library(dplyr)
library(lubridate)
library(amt)
library(tidyr)
library(mcp)
#### load custom functions ####
matching.days <- function(temp){
counts <- ddply(temp, .(day), nrow)
counts <- counts[counts$V1==2,]
temp <- temp[temp$day %in% counts$day,]
return(temp)
}
## slight variation on function used in conditional logistic regression; specific to changepoint data prep
matching.days_timenumeric <- function(temp){
counts <- ddply(temp, .(time_numeric), nrow)
counts <- counts[counts$V1==2,]
temp <- temp[temp$time_numeric %in% counts$time_numeric,]
return(temp)
}
matching.wks <- function(temp){
counts <- ddply(temp, .(wpd), nrow)
counts <- counts[counts$V1==2,]
temp <- temp[temp$wpd %in% counts$wpd,]
return(temp)
}
outlier.cutoff <- function(x){
out.cutoff <- mean(x, na.rm = T) + (3*sd(x, na.rm = T))
return(out.cutoff)
}
#### set index ####
## for running in parallel via high throughput
z <- as.numeric(commandArgs(TRUE[1])) + 1
#### set seed ####
set.seed(48965 + z)
#### MCMC run parameters ####
adapt.n <- 22000
iter.n <- 10000
#### LOAD DATA ####
## load movement metrics
dat <- get(load("case_control_movement_metrics.Rdata"))
daily.dat <- dat$daily.metrics ## daily movement metrics
wkly.od.areas <- dat$wkly.od.areas ## weekly range areas
wkly.od.areas$start.date <- as.Date(wkly.od.areas$start.date, tz = "Canada/Saskatchewan") ## ignore Daylight Saving Time
wkly.od.areas$end.date <- as.Date(wkly.od.areas$end.date, tz = "Canada/Saskatchewan")
## load metadata
data <- get(load("case_control_collar_and_metadata.Rdata"))
## case-control metadata
meta <- data$meta.data
rm(data)
#### prep model data ####
## only days with at least two observations
model.dat <- daily.dat[daily.dat$daily.obs>=2,]
model.dat$day <- as.Date(model.dat$day, tz = "Canada/Saskatchewan")
#### process dataset for each metric ####
### loop through metrics
metrics <- c("mean.km_p_hr", "daily.tort", "mean.disp_m")
cutoffs <- apply(model.dat[,c("mean.km_p_hr", "daily.tort", "mean.disp_m")], 2, outlier.cutoff)
model.dat.pre <- model.dat
for(i in 1:length(metrics)){
temp.met.dat <- model.dat[,colnames(model.dat)==metrics[i]]
temp.cutoff <- cutoffs[names(cutoffs)==metrics[i]]
in.index <- which(temp.met.dat<temp.cutoff | is.na(temp.met.dat))
model.dat <- model.dat[in.index,]
}
## only days where case and control both have data
pairs <- model.dat %>% nest(data = c(-"pair.id"))
pairs <- pairs %>%
mutate(matched.days = lapply(data, matching.days))
model.dat.paired <- pairs %>%
amt::select(pair.id, matched.days) %>% unnest(cols = matched.days)
#### WEEKLY DATA ####
## exclude outliers ##
od.cutoff <- outlier.cutoff(wkly.od.areas$wkly.area.km2)
# od.cutoff
wkly.od.areas <- subset(wkly.od.areas, wkly.od.areas$wkly.area.km2<od.cutoff)
## only weeks where case and control both have data ##
wkpairs <- wkly.od.areas %>% nest(data = c(-"pair.id"))
wkpairs <- wkpairs %>%
mutate(matched.wks = lapply(data, matching.wks))
model.dat.wkpaired <- wkpairs %>%
amt::select(pair.id, matched.wks) %>% unnest(cols = matched.wks)
## table of high-throughput runs
runs <- expand.grid(covars=c(metrics, "wkly.area.km2"),
mods = 0:1,
class = c("case", "control"),
prior = c(FALSE, TRUE)
)
runs <- runs[!(runs$mods==0 & runs$prior),]
## which covariate in this run? Transform response variable and set priors
if(runs$covars[z]=="mean.km_p_hr"){
df_cov <- model.dat.paired[,c("id", "pair.id", "class", "mpd", "day", "mean.km_p_hr")]
df_cov$resp <- sqrt(df_cov$mean.km_p_hr)
prior <- list(cp_1 = "dnorm(120, 15) T(MINX, MAXX)")
}else if(runs$covars[z]=="daily.tort"){
df_cov <- model.dat.paired[,c("id", "pair.id", "class", "mpd", "day", "daily.tort")]
df_cov$daily.tort_sc <- scale(df_cov$daily.tort)[,1]
constant <- abs(min(df_cov$daily.tort_sc)) + 0.01
df_cov$resp <- log(df_cov$daily.tort_sc+constant)
prior <- list(cp_1 = "dnorm(120, 15) T(MINX, MAXX)")
}else if(runs$covars[z]=="mean.disp_m"){
df_cov <- model.dat.paired[,c("id", "pair.id", "class", "mpd", "day", "mean.disp_m")]
df_cov$resp <- log(df_cov$mean.disp_m)
prior <- list(cp_1 = "dnorm(120, 15) T(MINX, MAXX)")
}else if(runs$covars[z]=="wkly.area.km2"){
df_cov <- model.dat.wkpaired[,c("id", "pair.id", "class", "wpd", "start.date", "wkly.area.km2")]
df_cov$resp <- log10(df_cov$wkly.area.km2)
colnames(df_cov)[colnames(df_cov)=="start.date"] <- "day"
prior <- list(cp_1 = "dnorm(4, 2) T(MINX, MAXX)")
}
# order data by id and date
df_cov <- df_cov %>% arrange(id, day)
# make sure id is a factor
df_cov$id <- as.factor(as.character(df_cov$id))
df_cov$id <- factor(df_cov$id)
if(runs$covars[z] != "wkly.area.km2"){
## add days pre-death for daily datasets
ids <- unique(df_cov$id)
df.new <- NULL
for(j in 1:length(ids)){
td <- df_cov[df_cov$id==ids[j],]
class <- unique(td$class)
## add "time pre-death" to tvRSF results
if(class=="case"){
tm <- meta[meta$cand.case==ids[j],]
}else{
tm <- meta[meta$lowtag==ids[j],]
}
end.date <- as.Date(tm$case_mort.date, tz = "Canada/Saskatchewan")
year <- format(max(td$day, na.rm = T), "%Y")
end.day <- format(end.date, "%m-%d")
if(end.day=="02-29"){
end.day <- "02-28"
}
end.date <- as.Date(paste0(year, "-", end.day), tz = "Canada/Saskatchewan")
if(end.date<max(td$day)){
end.date <- as.Date(paste0(as.numeric(year)+1, "-", end.day), tz = "Canada/Saskatchewan")
}
td$time_pre_death_days <- as.numeric(difftime(end.date, td$day, units = "days"))
td$time_numeric <- 180-td$time_pre_death_days # "reverse" of "days pre-death" so 180 = death day
if(any(na.omit(abs(td$time_pre_death_days))>190) | any(na.omit(td$time_pre_death_days)<(-1))){
stop("Days pre-death is wrong somewhere!")
}
df.new <- rbind(df.new, td)
}
}else{
df.new <- df_cov
# "reverse" of "weeks pre-death" so 26 = death week
df.new$time_numeric <- 26-df.new$wpd
}
df_cov <- df.new
## case or control?
df_mod <- df_cov[df_cov$class==runs$class[z],]
# order dataframe
dfg <- df_mod %>% arrange(id, time_numeric)
# conduct mcp analysis
dfs <- dfg %>%
dplyr::select(id, resp, time_numeric) %>%
dplyr::rename(x=time_numeric, y= resp) %>%
as.data.frame()
# test the two hypotheses (null/no change vs gradual change)
if(!runs$prior[z]){
## don't set prior ##
if(runs$mods[z]==0){
# no behavioral change
modelf = list(
y ~ 1, # intercept flat
1 + (1|id) ~ 0 # joined intercept, varying by id
)
fitm = mcp(modelf, data = dfs, par_x = "x", adapt = adapt.n, iter = iter.n)
}else if(runs$mods[z]==1){
# plateau then slope (gradual change in behavior as death approaches)
modelf = list(
y ~ 1, # intercept flat
1 + (1|id) ~ 0 + x # joined slope, varying by id
)
fitm = mcp(modelf, data = dfs, adapt = adapt.n, iter = iter.n)
}
}else{
## set prior values ##
if(runs$mods[z]==0){
# no behavioral change
modelf = list(
y ~ 1, # intercept flat
1 + (1|id) ~ 0 # joined intercept, varying by id
)
fitm = mcp(modelf, data = dfs, par_x = "x", prior = prior, adapt = adapt.n, iter = iter.n)
}else if(runs$mods[z]==1){
# plateau then slope (gradual change in behavior as death approaches)
modelf = list(
y ~ 1, # intercept flat
1 + (1|id) ~ 0 + x # joined slope, varying by id
)
fitm = mcp(modelf, data = dfs, prior = prior, adapt = adapt.n, iter = iter.n)
}
}
# extract loo to see which model is preferred
fitm$loo = loo(fitm)
## save results to assess on local machine
out.name <- paste0("move_mcp_mod", runs$mods[z], "_", runs$covars[z], "_", runs$class[z], "_prior", runs$prior[z], ".Rdata")
save(fitm, file = out.name)
Now we can load the changepoint models, which were fit using high throughput computing resources. I evaluated the changepoint models the same way for all movement metrics; for simplicity, I’ll just show this process for movement rate. We start by reading in the changepoint models for cases and controls - these include a null model, an alternative hypothesis model with uninformative priors, and an alternative model with informative priors. We then use LOO to compare the three models for each of cases and controls.
fit1_case <- get(load("../Project_data/move_mcp_mods/move_mcp_mod0_mean.km_p_hr_case_priorFALSE.Rdata"))
fit2_case <- get(load("../Project_data/move_mcp_mods/move_mcp_mod1_mean.km_p_hr_case_priorFALSE.Rdata"))
fit3_case <- get(load("../Project_data/move_mcp_mods/move_mcp_mod1_mean.km_p_hr_case_priorTRUE.Rdata"))
fit1_control <- get(load("../Project_data/move_mcp_mods/move_mcp_mod0_mean.km_p_hr_control_priorFALSE.Rdata"))
fit2_control <- get(load("../Project_data/move_mcp_mods/move_mcp_mod1_mean.km_p_hr_control_priorFALSE.Rdata"))
fit3_control <- get(load("../Project_data/move_mcp_mods/move_mcp_mod1_mean.km_p_hr_control_priorTRUE.Rdata"))
loo::loo_compare(fit1_case$loo, fit2_case$loo, fit3_case$lo)
## elpd_diff se_diff
## model2 0.0 0.0
## model3 -13.3 3.8
## model1 -476.4 27.4
loo::loo_compare(fit1_control$loo, fit2_control$loo, fit3_control$loo)
## elpd_diff se_diff
## model2 0.0 0.0
## model3 -126.0 18.7
## model1 -311.4 22.4
For both cases and controls, model 2 (gradual change with uninformative priors) is the preferred model.
Let’s look at posteriors for the cases and check our Rhats and other diagnostics. We’ll start with the cases:
### cases ###
fit1_case$loo
##
## Computed from 30000 by 2533 log-likelihood matrix.
##
## Estimate SE
## elpd_loo 3344.6 33.2
## p_loo 1.9 0.1
## looic -6689.2 66.5
## ------
## MCSE of elpd_loo is 0.0.
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
fit2_case$loo
##
## Computed from 30000 by 2533 log-likelihood matrix.
##
## Estimate SE
## elpd_loo 3821.0 37.6
## p_loo 27.1 1.3
## looic -7642.0 75.2
## ------
## MCSE of elpd_loo is 0.0.
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
fit3_case$loo
##
## Computed from 30000 by 2533 log-likelihood matrix.
##
## Estimate SE
## elpd_loo 3807.7 37.6
## p_loo 37.3 2.7
## looic -7615.5 75.2
## ------
## MCSE of elpd_loo is 0.5.
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
# examine posterior fit of top model
plot(fit1_case, q_fit = TRUE)
plot(fit2_case, q_fit = TRUE)
plot(fit3_case, q_fit = TRUE)
#Gelman-Rubin convergence diagnostic (check Rhats)
summary(fit1_case)
## Family: gaussian(link = 'identity')
## Iterations: 30000 from 3 chains.
## Segments:
## 1: y ~ 1
## 2: y ~ 1 + (1 | id) ~ 0
##
## Population-level parameters:
## name mean lower upper Rhat n.eff
## cp_1 90.073 5.8022 176.150 1 30000
## cp_1_sd 288.541 0.0049 707.324 1 30277
## int_1 0.183 0.1803 0.185 1 18834
## sigma_1 0.065 0.0629 0.066 1 19491
##
## Use `ranef(fit)` to summarise the varying effect(s): cp_1_id
summary(fit2_case)
## Family: gaussian(link = 'identity')
## Iterations: 30000 from 3 chains.
## Segments:
## 1: y ~ 1
## 2: y ~ 1 + (1 | id) ~ 0 + x
##
## Population-level parameters:
## name mean lower upper Rhat n.eff
## cp_1 2.4e+00 9.9e-05 6.3e+00 1 251
## cp_1_sd 4.2e+02 1.3e+02 8.0e+02 1 11940
## int_1 2.3e-01 2.3e-01 2.3e-01 1 3117
## sigma_1 5.3e-02 5.2e-02 5.5e-02 1 17063
## x_2 -5.1e-04 -5.5e-04 -4.8e-04 1 3361
##
## Use `ranef(fit)` to summarise the varying effect(s): cp_1_id
summary(fit3_case)
## Family: gaussian(link = 'identity')
## Iterations: 30000 from 3 chains.
## Segments:
## 1: y ~ 1
## 2: y ~ 1 + (1 | id) ~ 0 + x
##
## Population-level parameters:
## name mean lower upper Rhat n.eff
## cp_1 2.3e+01 8.4844 40.7636 1.1 25
## cp_1_sd 4.2e+02 119.7280 806.1771 1.0 11745
## int_1 2.2e-01 0.2188 0.2303 1.1 142
## sigma_1 5.3e-02 0.0519 0.0549 1.0 15007
## x_2 -5.5e-04 -0.0006 -0.0005 1.1 313
##
## Use `ranef(fit)` to summarise the varying effect(s): cp_1_id
# posterior check
pp_check(fit1_case)
pp_check(fit2_case)
pp_check(fit3_case)
# change point posteriors
plot_pars(fit1_case)
plot_pars(fit2_case)
plot_pars(fit3_case)
And then we’ll repeat the same process for controls…
### controls ###
fit1_control$loo
##
## Computed from 30000 by 2533 log-likelihood matrix.
##
## Estimate SE
## elpd_loo 3215.8 31.1
## p_loo 1.8 0.1
## looic -6431.6 62.2
## ------
## MCSE of elpd_loo is 0.0.
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
fit2_control$loo
##
## Computed from 30000 by 2533 log-likelihood matrix.
##
## Estimate SE
## elpd_loo 3527.2 34.8
## p_loo 21.5 1.0
## looic -7054.3 69.6
## ------
## MCSE of elpd_loo is 0.1.
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
fit3_control$loo
##
## Computed from 30000 by 2533 log-likelihood matrix.
##
## Estimate SE
## elpd_loo 3401.2 33.4
## p_loo 181.3 7.1
## looic -6802.4 66.7
## ------
## MCSE of elpd_loo is 10.4.
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
# examine posterior fit of top model
plot(fit1_control, q_fit = TRUE)
plot(fit2_control, q_fit = TRUE)
plot(fit3_control, q_fit = TRUE)
#Gelman-Rubin convergence diagnostic (check Rhats)
summary(fit1_control)
## Family: gaussian(link = 'identity')
## Iterations: 30000 from 3 chains.
## Segments:
## 1: y ~ 1
## 2: y ~ 1 + (1 | id) ~ 0
##
## Population-level parameters:
## name mean lower upper Rhat n.eff
## cp_1 89.639 2.6068 173.00 1 31266
## cp_1_sd 288.280 0.0071 713.23 1 28971
## int_1 0.213 0.2104 0.22 1 18574
## sigma_1 0.068 0.0661 0.07 1 18344
##
## Use `ranef(fit)` to summarise the varying effect(s): cp_1_id
summary(fit2_control)
## Family: gaussian(link = 'identity')
## Iterations: 30000 from 3 chains.
## Segments:
## 1: y ~ 1
## 2: y ~ 1 + (1 | id) ~ 0 + x
##
## Population-level parameters:
## name mean lower upper Rhat n.eff
## cp_1 5.1e+00 5.9e-04 1.4e+01 1 80
## cp_1_sd 5.0e+02 1.7e+02 8.7e+02 1 13527
## int_1 1.7e-01 1.6e-01 1.7e-01 1 1552
## sigma_1 6.0e-02 5.8e-02 6.2e-02 1 17782
## x_2 4.2e-04 3.9e-04 4.6e-04 1 2352
##
## Use `ranef(fit)` to summarise the varying effect(s): cp_1_id
summary(fit3_control)
## Family: gaussian(link = 'identity')
## Iterations: 30000 from 3 chains.
## Segments:
## 1: y ~ 1
## 2: y ~ 1 + (1 | id) ~ 0 + x
##
## Population-level parameters:
## name mean lower upper Rhat n.eff
## cp_1 4.8e+01 1.8e+01 6.7e+01 2.4 32
## cp_1_sd 4.6e+02 1.4e+02 8.5e+02 1.0 12796
## int_1 2.2e-01 1.7e-01 2.4e-01 21.2 442
## sigma_1 6.1e-02 5.9e-02 6.3e-02 1.6 15051
## x_2 -1.8e-04 -5.5e-04 4.9e-04 31.7 524
##
## Use `ranef(fit)` to summarise the varying effect(s): cp_1_id
# posterior check
pp_check(fit1_control)
pp_check(fit2_control)
pp_check(fit3_control)
# change point posteriors
plot_pars(fit1_control)
plot_pars(fit2_control)
plot_pars(fit3_control)
We can then save our top models (model 2 for both cases and controls), and proceed with making some plots of our results.
# save models as objects for later plotting
rate_case <- fit2_case
rate_control <- fit2_control
Now we can make a plot of our movement rate changepoint results:
rate.a <- plot(rate_case, geom_data = FALSE, q_fit = TRUE, nsamples = 2000)+
geom_hline(yintercept = 0, color = "gray", linetype = "dashed") +
coord_cartesian(ylim = c(0.12, 0.26), expand = TRUE)+
scale_y_continuous(limits = c(0.12, 0.26), breaks = seq(0.15, 0.26, 0.05), labels = seq(0.15, 0.26, 0.05)) +
scale_x_continuous(breaks=c(0,30,60,90,120,150,181), limits=c(0,183), labels = c(6,5,4,3,2,1,"Death"))+
xlab("Months prior to case death") +
ylab("Transformed movement rate") +
theme_bw() +
theme(
panel.background = element_rect(colour = "black", linewidth=1, linetype = "solid"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks.length = unit(0.2,"cm"),
axis.title.y = element_text(size = 16, color = "black"),
axis.title.x = element_text(size = 16, color = "black"),
axis.text.x = element_text(size = 12, color = "black"),
axis.text.y = element_text(size = 12, color = "black")) +
theme(axis.title.x = element_text(margin = ggplot2::margin(t = 10)))+
theme(axis.title.y = element_text(margin = ggplot2::margin(r = 5)))+
theme(legend.position = "none")+
theme(legend.title = element_blank()) +
ggplot2::annotate("text", x = 90, y = 0.25, label = "CWD cases", size = 5, color = "black")
rate.b <- plot(rate_control, geom_data = FALSE, q_fit = TRUE, nsamples = 2000)+
geom_hline(yintercept = 0, color = "gray", linetype = "dashed") +
coord_cartesian(ylim = c(0.12, 0.26), expand = TRUE)+
scale_y_continuous(limits = c(0.12, 0.26), breaks = seq(0.15, 0.26, 0.05), labels = seq(0.15, 0.26, 0.05)) +
scale_x_continuous(breaks=c(0,30,60,90,120,150,181), limits=c(0,183), labels = c(6,5,4,3,2,1,"Death"))+
xlab("Months prior to case death") +
ylab("Transformed movement rate") +
theme_bw() +
theme(
panel.background = element_rect(colour = "black", linewidth=1, linetype = "solid"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks.length = unit(0.2,"cm"),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 16, color = "black"),
axis.text.x = element_text(size = 12, color = "black"),
axis.text.y = element_text(size = 12, color = "black")) +
theme(axis.title.x = element_text(margin = ggplot2::margin(t = 10)))+
theme(legend.position = "none")+
theme(legend.title = element_blank()) +
ggplot2::annotate("text", x = 90, y = 0.25, label = "Controls", size = 5, color = "black")
rate.plot <- ggarrange(rate.a, rate.b, labels = c("A", "B"), nrow = 1)
rate.plot
## can save as a jpeg
# ggsave("../Figures/movemcp_plots/rates.jpg", rate.plot, width = 12, height = 5, units = "in", dpi = 300)
And that’s it! I’ll close with my session info:
sessionInfo()
## R version 4.4.0 (2024-04-24)
## Platform: aarch64-apple-darwin20
## Running under: macOS 15.5
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/lib/libRlapack.dylib; LAPACK version 3.12.0
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## time zone: America/Chicago
## tzcode source: internal
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] mcp_0.3.4 tidyr_1.3.1 ggpubr_0.6.0
## [4] amt_0.3.0.0 lubridate_1.9.3 dplyr_1.1.4
## [7] plyr_1.8.9 ggplot2_3.5.1 adehabitatLT_0.3.27
## [10] CircStats_0.2-6 boot_1.3-30 MASS_7.3-60.2
## [13] adehabitatMA_0.3.16 ade4_1.7-22 sp_2.1-4
##
## loaded via a namespace (and not attached):
## [1] tidyselect_1.2.1 svUnit_1.0.6 farver_2.1.1
## [4] loo_2.8.0 tidybayes_3.0.6 fastmap_1.2.0
## [7] tensorA_0.36.2.1 digest_0.6.35 timechange_0.3.0
## [10] lifecycle_1.0.4 sf_1.0-16 survival_3.5-8
## [13] magrittr_2.0.3 posterior_1.5.0 compiler_4.4.0
## [16] rlang_1.1.3 sass_0.4.9 tools_4.4.0
## [19] utf8_1.2.4 yaml_2.3.8 knitr_1.46
## [22] ggsignif_0.6.4 labeling_0.4.3 classInt_0.4-10
## [25] abind_1.4-5 KernSmooth_2.23-22 withr_3.0.0
## [28] purrr_1.0.2 grid_4.4.0 fansi_1.0.6
## [31] e1071_1.7-14 colorspace_2.1-0 scales_1.3.0
## [34] cli_3.6.2 rmarkdown_2.28 generics_0.1.3
## [37] rstudioapi_0.16.0 reshape2_1.4.4 DBI_1.2.2
## [40] cachem_1.0.8 proxy_0.4-27 stringr_1.5.1
## [43] splines_4.4.0 bayesplot_1.11.1 parallel_4.4.0
## [46] matrixStats_1.3.0 vctrs_0.6.5 Matrix_1.7-0
## [49] jsonlite_1.8.8 carData_3.0-5 car_3.1-2
## [52] patchwork_1.3.0 arrayhelpers_1.1-0 rstatix_0.7.2
## [55] ggdist_3.3.2 jquerylib_0.1.4 units_0.8-5
## [58] glue_1.7.0 cowplot_1.1.3 distributional_0.4.0
## [61] stringi_1.8.3 gtable_0.3.5 munsell_0.5.1
## [64] tibble_3.2.1 pillar_1.9.0 htmltools_0.5.8.1
## [67] R6_2.5.1 Rdpack_2.6 evaluate_0.23
## [70] lattice_0.22-6 highr_0.10 rbibutils_2.2.16
## [73] backports_1.4.1 broom_1.0.5 bslib_0.7.0
## [76] class_7.3-22 Rcpp_1.0.12 coda_0.19-4.1
## [79] checkmate_2.3.1 xfun_0.43 pkgconfig_2.0.3